/* Magic number indicating a Multiboot header. */
.long 0x1BADB002
/* Flags to bootloader (see Multiboot spec). */
- .long 0x00000006
+ .long 0x00000002
/* Checksum: must be the negated sum of the first two fields. */
- .long -0x1BADB008
- /* Unused loader addresses (ELF header has all this already).*/
- .long 0,0,0,0,0
- /* EGA text mode. */
- .long 1,0,0,0
+ .long -0x1BADB004
hal_entry:
/* Set up a few descriptors: on entry only CS is guaranteed good. */
mk_l1_pgentry((frames[i] << PAGE_SHIFT) | __PAGE_HYPERVISOR);
page = frame_table + frames[i];
- page->flags &= ~PG_type_mask;
+ page->flags &= ~(PG_type_mask | PG_need_flush);
page->flags |= PGT_gdt_page;
get_page_type(page);
get_page_tot(page);
}
- flush_tlb();
+ local_flush_tlb();
/* Copy over first entries of the new GDT. */
memcpy((void *)GDT_VIRT_START, gdt_table, FIRST_DOMAIN_GDT_ENTRY*8);
spin_unlock(&tlbstate_lock);
}
-void flush_tlb_current_task(void)
-{
-#if 0
- struct mm_struct *mm = ¤t->mm;
- unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
-
- local_flush_tlb();
- if (cpu_mask)
- flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
-#endif
-}
-
-void flush_tlb_mm (struct mm_struct * mm)
-{
-#if 0
- unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
-
- if (current->active_mm == mm)
- local_flush_tlb();
- if (cpu_mask)
- flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
-#endif
-}
-
-#if 0
-void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
-{
- struct mm_struct *mm = vma->vm_mm;
- unsigned long cpu_mask = mm.cpu_vm_mask & ~(1 << smp_processor_id());
-
- if (current->active_mm == mm) {
- if(current->mm)
- __flush_tlb_one(va);
- else
- leave_mm(smp_processor_id());
- }
-
- if (cpu_mask)
- flush_tlb_others(cpu_mask, mm, va);
-}
-#endif
-
static inline void do_flush_tlb_all_local(void)
{
unsigned long cpu = smp_processor_id();
#include <xeno/delay.h>
#include <xeno/spinlock.h>
#include <xeno/irq.h>
+#include <xeno/perfc.h>
#include <asm/domain_page.h>
#include <asm/system.h>
#include <asm/io.h>
goto unlock_and_bounce_fault;
unmap_domain_mem(ldt_page);
+ if ( page->flags & PG_need_flush )
+ {
+ perfc_incrc(need_flush_tlb_flush);
+ local_flush_tlb();
+ page->flags &= ~PG_need_flush;
+ }
+
page->flags &= ~PG_type_mask;
page->flags |= PGT_ldt_page;
}
#include <xeno/lib.h>
#include <xeno/mm.h>
#include <xeno/dom_mem_ops.h>
+#include <xeno/perfc.h>
#include <xeno/sched.h>
#include <xeno/event.h>
#include <asm/domain_page.h>
unsigned long i;
unsigned long flags;
long rc = 0;
+ int need_flush = 0;
spin_lock_irqsave(&free_list_lock, flags);
spin_lock(&p->page_lock);
goto out;
}
+ need_flush |= pf->flags & PG_need_flush;
+
pf->flags = 0;
list_del(&pf->list);
spin_unlock(&p->page_lock);
spin_unlock_irqrestore(&free_list_lock, flags);
+ if ( need_flush )
+ {
+ __flush_tlb();
+ perfc_incrc(need_flush_tlb_flush);
+ }
+
return rc ? rc : bop.size;
}
do { if ( (*cache & READY_FOR_TLB_FLUSH) ) *cache = 0; }
while ( ((unsigned long)(++cache) & ~PAGE_MASK) != 0 );
- perfc_incr(domain_page_tlb_flush);
+ perfc_incrc(domain_page_tlb_flush);
local_flush_tlb();
}
/*
* THE FOLLOWING ARE ISSUES IF GUEST OPERATING SYSTEMS BECOME SMP-CAPABLE.
- * [THAT IS, THEY'RE NOT A PROBLEM NOW, AND MAY NOT EVER BE.]
* -----------------------------------------------------------------------
*
* *********
* than one, we'd probably just flush on all processors running the domain.
* *********
*
- * ** 1 **
* The problem involves creating new page tables which might be mapped
* writeable in the TLB of another processor. As an example, a domain might be
* running in two contexts (ie. on two processors) simultaneously, using the
* FLUSH_NONE, FLUSH_PAGETABLE, FLUSH_DOMAIN. A flush reduces this
* to FLUSH_NONE, while squashed write mappings can only promote up
* to more aggressive flush types.
- *
- * ** 2 **
- * Same problem occurs when removing a page table, at level 1 say, then
- * making it writeable. Need a TLB flush between otherwise another processor
- * might write an illegal mapping into the old table, while yet another
- * processor can use the illegal mapping because of a stale level-2 TLB
- * entry. So, removal of a table reference sets 'flush_level' appropriately,
- * and a flush occurs on next addition of a fresh write mapping.
- *
- * BETTER SOLUTION FOR BOTH 1 AND 2:
- * When type_refcnt goes to zero, leave old type in place (don't set to
- * PGT_none). Then, only flush if making a page table of a page with
- * (cnt=0,type=PGT_writeable), or when adding a write mapping for a page
- * with (cnt=0, type=PGT_pagexxx). A TLB flush will cause all pages
- * with refcnt==0 to be reset to PGT_none. Need an array for the purpose,
- * added to when a type_refcnt goes to zero, and emptied on a TLB flush.
- * Either have per-domain table, or force TLB flush at end of each
- * call to 'process_page_updates'.
- * Most OSes will always keep a writeable reference hanging around, and
- * page table structure is fairly static, so this mechanism should be
- * fairly cheap.
- *
- * MAYBE EVEN BETTER? [somewhat dubious: not for first cut of the code]:
- * If we need to force an intermediate flush, those other processors
- * spin until we complete, then do a single TLB flush. They can spin on
- * the lock protecting 'process_page_updates', and continue when that
- * is freed. Saves cost of setting up and servicing an IPI: later
- * communication is synchronous. Processors trying to install the domain
- * or domain&pagetable would also enter the spin.
- *
- * ** 3 **
- * Indeed, this problem generalises to reusing page tables at different
- * levels of the hierarchy (conceptually, the guest OS can use the
- * hypervisor to introduce illegal table entries by proxy). Consider
- * unlinking a level-1 page table and reintroducing at level 2 with no
- * TLB flush. Hypervisor can add a reference to some other level-1 table
- * with the RW bit set. This is fine in the level-2 context, but some
- * other processor may still be using that table in level-1 context
- * (due to a stale TLB entry). At level 1 it may look like the
- * processor has write access to the other level-1 page table! Therefore
- * can add illegal values there with impunity :-(
- *
- * Fortunately, the solution above generalises to this extended problem.
*/
-/*
- * UPDATE 12.11.02.: We no longer have struct page and mem_map. These
- * have been replaced by struct pfn_info and frame_table respectively.
- *
- * system_free_list is a list_head linking all system owned free pages.
- * it is initialized in init_frametable.
- *
- * Boris Dragovic.
- */
-
#include <xeno/config.h>
#include <xeno/init.h>
#include <xeno/lib.h>
#include <xeno/mm.h>
#include <xeno/sched.h>
#include <xeno/errno.h>
+#include <xeno/perfc.h>
#include <asm/page.h>
#include <asm/flushtlb.h>
#include <asm/io.h>
return -1;
}
+ if ( flags & PG_need_flush )
+ {
+ flush_tlb[smp_processor_id()] = 1;
+ page->flags &= ~PG_need_flush;
+ perfc_incrc(need_flush_tlb_flush);
+ }
+
page->flags &= ~PG_type_mask;
page->flags |= type;
}
((page->flags & PG_need_flush) == PG_need_flush)));
if ( writeable )
{
- if ( put_page_type(page) == 0 )
- {
- flush_tlb[smp_processor_id()] = 1;
- page->flags &= ~PG_need_flush;
- }
+ put_page_type(page);
}
else if ( unlikely(((page->flags & PG_type_mask) == PGT_ldt_page) &&
(page_type_count(page) != 0)) )
void perfc_printall(u_char key, void *dev_id, struct pt_regs *regs)
{
- int i, j;
+ int i, j, sum;
s_time_t now = NOW();
atomic_t *counters = (atomic_t *)&perfcounters;
for ( i = 0; i < NR_PERFCTRS; i++ )
{
- printk("%20s ", perfc_info[i].name);
+ printk("%-32s ", perfc_info[i].name);
switch ( perfc_info[i].type )
{
case TYPE_SINGLE:
- printk("%10d 0x%08x",
- atomic_read(&counters[0]),
- atomic_read(&counters[0]));
+ printk("TOTAL[%10d]", atomic_read(&counters[0]));
counters += 1;
break;
case TYPE_CPU:
+ for ( j = sum = 0; j < smp_num_cpus; j++ )
+ sum += atomic_read(&counters[j]);
+ printk("TOTAL[%10d] ", sum);
for ( j = 0; j < smp_num_cpus; j++ )
- printk("CPU%02d[%10d 0x%08x] ",
- j, atomic_read(&counters[j]),
- atomic_read(&counters[j]));
+ printk("CPU%02d[%10d] ", j, atomic_read(&counters[j]));
counters += NR_CPUS;
break;
case TYPE_ARRAY:
+ for ( j = sum = 0; j < perfc_info[i].nr_elements; j++ )
+ sum += atomic_read(&counters[j]);
+ printk("TOTAL[%10d] ", sum);
for ( j = 0; j < perfc_info[i].nr_elements; j++ )
- printk("ARR%02d[%10d 0x%08x] ",
- j, atomic_read(&counters[j]),
- atomic_read(&counters[j]));
+ printk("ARR%02d[%10d] ", j, atomic_read(&counters[j]));
counters += j;
break;
}
{
if ( page->type_count == 0 )
{
- page->flags &= ~(PG_type_mask | PG_need_flush);
- /* NB. This ref alone won't cause a TLB flush. */
+ page->flags &= ~PG_type_mask;
+ /* No need for PG_need_flush here. */
page->flags |= PGT_writeable_page;
}
get_page_type(page);
pfn++ )
{
page = frame_table + pfn;
- if ( writeable_buffer &&
- (put_page_type(page) == 0) &&
- (page->flags & PG_need_flush) )
- {
- __flush_tlb();
- page->flags &= ~PG_need_flush;
- }
+ if ( writeable_buffer )
+ put_page_type(page);
put_page_tot(page);
}
spin_unlock_irqrestore(&p->page_lock, flags);
*
* - flush_tlb() flushes the current mm struct TLBs
* - flush_tlb_all() flushes all processes TLBs
- * - flush_tlb_mm(mm) flushes the specified mm context TLB's
- * - flush_tlb_page(vma, vmaddr) flushes one page
- * - flush_tlb_range(mm, start, end) flushes a range of pages
* - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
*
* ..but the i386 has somewhat limited tlb flushing capabilities,
#ifndef CONFIG_SMP
-#define flush_tlb() __flush_tlb()
-#define flush_tlb_all() __flush_tlb_all()
-#define local_flush_tlb() __flush_tlb()
-
-static inline void flush_tlb_mm(struct mm_struct *mm)
-{
- if (mm == current->active_mm)
- __flush_tlb();
-}
-
-static inline void flush_tlb_cpu(unsigned int cpu)
-{
- __flush_tlb();
-}
-
-#if 0
-static inline void flush_tlb_page(struct vm_area_struct *vma,
- unsigned long addr)
-{
- if (vma->vm_mm == current->active_mm)
- __flush_tlb_one(addr);
-}
-#endif
-
-static inline void flush_tlb_range(struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- if (mm == current->active_mm)
- __flush_tlb();
-}
+#define flush_tlb() __flush_tlb()
+#define flush_tlb_all() __flush_tlb_all()
+#define local_flush_tlb() __flush_tlb()
+#define flush_tlb_cpu(_cpu) __flush_tlb()
#else
#include <xeno/smp.h>
-#define local_flush_tlb() \
- __flush_tlb()
+#define flush_tlb() __flush_tlb()
+#define local_flush_tlb() __flush_tlb()
extern void flush_tlb_all(void);
-extern void flush_tlb_current_task(void);
-extern void flush_tlb_mm(struct mm_struct *);
-
-#define flush_tlb() flush_tlb_current_task()
-
-static inline void flush_tlb_range(struct mm_struct * mm, unsigned long start, unsigned long end)
-{
- flush_tlb_mm(mm);
-}
extern void flush_tlb_others(unsigned long cpumask);
static inline void flush_tlb_cpu(unsigned int cpu)
PERFCOUNTER( net_rx_tlbflush, "net rx tlb flushes" )
PERFCOUNTER( net_tx_transmitted, "net tx transmitted" )
-PERFCOUNTER( domain_page_tlb_flush, "domain page tlb flushes" )
+PERFCOUNTER_CPU( domain_page_tlb_flush, "domain page tlb flushes" )
+PERFCOUNTER_CPU( need_flush_tlb_flush, "PG_need_flush tlb flushes" )